The implementation of U-Net fully convolutional neural network

Full basic implementation according to the white-paper.
"U-Net: Convolutional Networks for Biomedical Image Segmentation"
https://arxiv.org/pdf/1505.04597.pdf

In [78]:
import datetime
import pathlib
import sys
import random

import matplotlib.pyplot as plt
import numpy as np
from IPython.display import display

from PIL import Image, ImageDraw

from sklearn.model_selection import train_test_split


%load_ext autoreload
%autoreload 2

Image.MAX_IMAGE_PIXELS = None
The autoreload extension is already loaded. To reload it, use:
  %reload_ext autoreload
In [2]:
SEED=241
random.seed(SEED)
np.random.seed(SEED)
In [3]:
plt.rcParams["figure.figsize"] = [26,19]

1 Create dataset

In [4]:
def load_map_and_mask(map_file, mask_file, workspace_dir):
    map_img = Image.open(str(map_file))
    
    polygons = []

    with open(str(mask_file)) as f:
        for line in f:
            line = line.strip()
            points = line.split(' ')
            polygon = [(int(xy[0]), int(xy[1])) for xy in [point.split(',') for point in points]]
            polygons.append(polygon)
       
    mask_img = Image.new('1', map_img.size, 0)

    for p in polygons:
        ImageDraw.Draw(mask_img).polygon(p, fill=1)
    
    mask_img.convert('RGB').save(str(workspace_dir/(mask_file.stem + '.jpg')), format='JPEG', quality=100)
    
    return map_img, mask_img  

def plot_masks(map_imgs, mask_imgs):
    rows = len(map_imgs)
    cols = 2
    
    for i, m in enumerate(map_imgs):
        plt.subplot(rows,cols, cols*i +1)
        plt.imshow(m)

        plt.subplot(rows,cols,cols*i +2)
        plt.imshow(mask_imgs[i])
        
    plt.show()

def map_stats(map_img, mask_img):
    m = np.array(mask_img).astype(np.byte)
    n = np.sum(m == 0)
    k = np.sum(m == 1)
    
    print(map_img.size)
    print('zeros ratio', 0 if n == 0 else round(n/(n+k), 3))
    print('ones ratio', 0 if k == 0 else round(k/(n+k), 3))
In [6]:
dataset_dir = pathlib.Path().cwd() / 'external_data' / 'data' / 'train'
src_images = list(dataset_dir.glob('**/*.tif'))
src_images = [img_p for img_p in src_images if '.mask.' not in img_p.name]

print('src_images', len(src_images))
src_images 13
In [7]:
timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
workspace_dir = pathlib.Path().cwd() / ('workspace_' + str(timestamp))
workspace_dir.mkdir(parents=True)

1.1 Read source Map images

In [8]:
src_images_and_masks = []

for img_p in src_images:
    map_img, mask = load_map_and_mask(img_p, img_p.parent/(img_p.stem + '.markup.txt'), workspace_dir) 
    src_images_and_masks.append({
        'map': map_img,
        'mask': mask
    })
    
    print('Stats for', img_p)
    map_stats(map_img, mask)
    print()
Stats for /home/sergey_sintsov/dubai/external_data/data/train/1/1.tif
(14475, 9834)
zeros ratio 0.866
ones ratio 0.134

Stats for /home/sergey_sintsov/dubai/external_data/data/train/10/10.tif
(15544, 9834)
zeros ratio 0.342
ones ratio 0.658

Stats for /home/sergey_sintsov/dubai/external_data/data/train/11/11.tif
(15544, 9834)
zeros ratio 0.428
ones ratio 0.572

Stats for /home/sergey_sintsov/dubai/external_data/data/train/2/2.tif
(14475, 9834)
zeros ratio 0.989
ones ratio 0.011

Stats for /home/sergey_sintsov/dubai/external_data/data/train/3/3.tif
(10088, 7022)
zeros ratio 0.309
ones ratio 0.691

Stats for /home/sergey_sintsov/dubai/external_data/data/train/4/4.tif
(15403, 9834)
zeros ratio 0.447
ones ratio 0.553

Stats for /home/sergey_sintsov/dubai/external_data/data/train/5/5.tif
(13988, 9834)
zeros ratio 0.612
ones ratio 0.388

Stats for /home/sergey_sintsov/dubai/external_data/data/train/6/6.tif
(15544, 9834)
zeros ratio 0.571
ones ratio 0.429

Stats for /home/sergey_sintsov/dubai/external_data/data/train/7/7.tif
(15544, 9834)
zeros ratio 0.514
ones ratio 0.486

Stats for /home/sergey_sintsov/dubai/external_data/data/train/8/8.tif
(15544, 9834)
zeros ratio 0.274
ones ratio 0.726

Stats for /home/sergey_sintsov/dubai/external_data/data/train/9/9.tif
(15544, 9834)
zeros ratio 0.89
ones ratio 0.11

Stats for /home/sergey_sintsov/dubai/external_data/data/train/demo-sample/demo-sample.tif
(14475, 9834)
zeros ratio 0.697
ones ratio 0.303

Stats for /home/sergey_sintsov/dubai/external_data/data/train/sample/sample.tif
(16083, 10927)
zeros ratio 0.257
ones ratio 0.743

In [9]:
plot_masks(
    [src['map'] for src in src_images_and_masks][:2], 
    [src['mask'] for src in src_images_and_masks][:2]
)

1.2 Create dataset images by cropping source Maps

In [72]:
def cut_map_into_tiles(map_img, 
                       mask_img, 
                       tile_size=100, 
                       tile_resize=100, 
                       tiles_count=100,
                       tile_prefix='',
                       save_tiles=True,
                       save_dir=None):
    X = []
    Y = []

    width, height = map_img.size
    
    top_left_coordinates = zip(
        np.random.randint(0, width - tile_size, tiles_count), 
        np.random.randint(0, height - tile_size, tiles_count)
    )

    map_img_in_rgb = map_img.convert('RGB')
    
    for i, (x,y) in enumerate(top_left_coordinates):
        tile = map_img_in_rgb.crop( (x, y, x+tile_size, y+tile_size) )
        tile_mask = mask_img.crop( (x, y, x+tile_size, y+tile_size) )

        tile = tile.resize((tile_resize, tile_resize))
        tile_mask = tile_mask.resize((tile_resize, tile_resize))
        
        mp = np.array(tile)
               
        mask = np.array(tile_mask).astype(np.byte)
       
        X.append(mp)
        Y.append(mask)
        
        if save_tiles:
            np.save(str(save_dir/(tile_prefix + 'map_' + str(i) + '.np')), mp)
            np.save(str(save_dir/(tile_prefix + 'mask_' + str(i) + '.np')), mask)
        
    return X, Y

def tiles_stats(Y):
    zeros_count = 0
    ones_count = 0
    for y in Y:
        zeros_count += np.sum((y==0))
        ones_count += np.sum((y==1))

    print('zeros', zeros_count)
    print('ones', ones_count)
    
    if zeros_count > 0:
        print('zeros ratio', zeros_count/(ones_count + zeros_count))
    if ones_count > 0:
        print('ones ratio', ones_count/(ones_count + zeros_count))
    print()
In [12]:
TILES_SIZE = 1024
TILES_COUNT = 1000
UNET_INPUT_SIZE = 256

X = []
Y = []

tiles_folder = workspace_dir / 'train_tiles'
tiles_folder.mkdir(parents=True, exist_ok=True)
    
for i, src in enumerate(src_images_and_masks):
    x, y = cut_map_into_tiles(
        src['map'], 
        src['mask'], 
        tile_size=TILES_SIZE,
        tile_resize=UNET_INPUT_SIZE,
        tiles_count=TILES_COUNT,
        tile_prefix=str(i),
        save_dir=tiles_folder
    )
    
    X += x
    Y += y
    print('done', i)
    
print('X', len(X))
print('Y', len(Y))
tiles_stats(Y)
done 0
done 1
done 2
done 3
done 4
done 5
done 6
done 7
done 8
done 9
done 10
done 11
done 12
X 13000
Y 13000
zeros 478009412
ones 373958588
zeros ratio 0.5610649836613582
ones ratio 0.43893501633864185

In [13]:
X = np.array(X)
Y = np.array(Y)[...,np.newaxis]

print(X.shape, Y.shape)

np.save(str(workspace_dir/'X.np'), X)
np.save(str(workspace_dir/'Y.np'), Y)
(13000, 256, 256, 3) (13000, 256, 256, 1)

1.3 View cropped images

In [14]:
def binary_mask_to_img(data):
    size = data.shape[::-1]
    databytes = np.packbits(data, axis=1)
    
    return Image.frombytes(mode='1', size=size, data=databytes)
In [15]:
# Show train data
show_count = 4
train_maps = [Image.fromarray(x.astype('uint8'), 'RGB') for x in X[:show_count]]
train_masks = [binary_mask_to_img(y[:,:,-1]) for y in Y[:show_count]]

plt.rcParams["figure.figsize"] = [50,50]
plot_masks(train_maps, train_masks)

2. Create and train U-Net model

In [16]:
import tensorflow as tf
from tensorflow.python.client import device_lib
from tensorflow.keras import backend as K
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import (ModelCheckpoint, LearningRateScheduler, ModelCheckpoint, EarlyStopping, 
                                        ReduceLROnPlateau, TensorBoard, TerminateOnNaN, Callback)
from tensorflow.keras.models import load_model
from tensorflow.keras.models import model_from_json
from tensorflow.keras.preprocessing.image import ImageDataGenerator
In [17]:
tf.__version__
Out[17]:
'1.11.0'
In [18]:
def get_available_gpus():
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU']

get_available_gpus()
Out[18]:
['/device:GPU:0']

2.1 Create U-net

In [38]:
def create_unet(input_sz=512):
    image_input = Input((input_sz, input_sz, 3))

# contracting path (down-sampling)
    conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(image_input)
    conv2 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
    conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
    conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv6)

    conv7 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
    conv8 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv8)

    conv9 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
    conv10 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
    
# expansive path (up-sampling)
    up_conv11 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(
        UpSampling2D(size = (2,2))(conv10)
    )
    cancat1 = concatenate([conv8, up_conv11], axis = 3)
    conv12 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(cancat1)
    conv13 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv12)
    
    up_conv14 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(
        UpSampling2D(size = (2,2))(conv13)
    )
    cancat2 = concatenate([conv6, up_conv14], axis = 3)
    conv15 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(cancat2)
    conv16 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv15)
    
    up_conv17 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(
        UpSampling2D(size = (2,2))(conv16)
    )
    cancat3 = concatenate([conv4, up_conv17], axis = 3)
    conv18 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(cancat3)
    conv19 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv18)

    up_conv20 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(
        UpSampling2D(size = (2,2))(conv19)
    )
    cancat4 = concatenate([conv2, up_conv20], axis = 3)
    conv21 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(cancat4)
    conv22 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv21)
    conv22 = SpatialDropout2D(0.2)(conv22)
    
    conv23 = Conv2D(1, 1, 1, activation = 'sigmoid')(conv22)

    return Model(inputs = [image_input], outputs = conv23)
In [64]:
unet = create_unet(input_sz=UNET_INPUT_SIZE)
unet.summary()
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_3 (InputLayer)            (None, 256, 256, 3)  0                                            
__________________________________________________________________________________________________
conv2d_48 (Conv2D)              (None, 256, 256, 32) 896         input_3[0][0]                    
__________________________________________________________________________________________________
conv2d_49 (Conv2D)              (None, 256, 256, 32) 9248        conv2d_48[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_8 (MaxPooling2D)  (None, 128, 128, 32) 0           conv2d_49[0][0]                  
__________________________________________________________________________________________________
conv2d_50 (Conv2D)              (None, 128, 128, 64) 18496       max_pooling2d_8[0][0]            
__________________________________________________________________________________________________
conv2d_51 (Conv2D)              (None, 128, 128, 64) 36928       conv2d_50[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_9 (MaxPooling2D)  (None, 64, 64, 64)   0           conv2d_51[0][0]                  
__________________________________________________________________________________________________
conv2d_52 (Conv2D)              (None, 64, 64, 128)  73856       max_pooling2d_9[0][0]            
__________________________________________________________________________________________________
conv2d_53 (Conv2D)              (None, 64, 64, 128)  147584      conv2d_52[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_10 (MaxPooling2D) (None, 32, 32, 128)  0           conv2d_53[0][0]                  
__________________________________________________________________________________________________
conv2d_54 (Conv2D)              (None, 32, 32, 256)  295168      max_pooling2d_10[0][0]           
__________________________________________________________________________________________________
conv2d_55 (Conv2D)              (None, 32, 32, 256)  590080      conv2d_54[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_11 (MaxPooling2D) (None, 16, 16, 256)  0           conv2d_55[0][0]                  
__________________________________________________________________________________________________
conv2d_56 (Conv2D)              (None, 16, 16, 512)  1180160     max_pooling2d_11[0][0]           
__________________________________________________________________________________________________
conv2d_57 (Conv2D)              (None, 16, 16, 512)  2359808     conv2d_56[0][0]                  
__________________________________________________________________________________________________
up_sampling2d_8 (UpSampling2D)  (None, 32, 32, 512)  0           conv2d_57[0][0]                  
__________________________________________________________________________________________________
conv2d_58 (Conv2D)              (None, 32, 32, 256)  524544      up_sampling2d_8[0][0]            
__________________________________________________________________________________________________
concatenate_8 (Concatenate)     (None, 32, 32, 512)  0           conv2d_55[0][0]                  
                                                                 conv2d_58[0][0]                  
__________________________________________________________________________________________________
conv2d_59 (Conv2D)              (None, 32, 32, 256)  1179904     concatenate_8[0][0]              
__________________________________________________________________________________________________
conv2d_60 (Conv2D)              (None, 32, 32, 256)  590080      conv2d_59[0][0]                  
__________________________________________________________________________________________________
up_sampling2d_9 (UpSampling2D)  (None, 64, 64, 256)  0           conv2d_60[0][0]                  
__________________________________________________________________________________________________
conv2d_61 (Conv2D)              (None, 64, 64, 128)  131200      up_sampling2d_9[0][0]            
__________________________________________________________________________________________________
concatenate_9 (Concatenate)     (None, 64, 64, 256)  0           conv2d_53[0][0]                  
                                                                 conv2d_61[0][0]                  
__________________________________________________________________________________________________
conv2d_62 (Conv2D)              (None, 64, 64, 128)  295040      concatenate_9[0][0]              
__________________________________________________________________________________________________
conv2d_63 (Conv2D)              (None, 64, 64, 128)  147584      conv2d_62[0][0]                  
__________________________________________________________________________________________________
up_sampling2d_10 (UpSampling2D) (None, 128, 128, 128 0           conv2d_63[0][0]                  
__________________________________________________________________________________________________
conv2d_64 (Conv2D)              (None, 128, 128, 64) 32832       up_sampling2d_10[0][0]           
__________________________________________________________________________________________________
concatenate_10 (Concatenate)    (None, 128, 128, 128 0           conv2d_51[0][0]                  
                                                                 conv2d_64[0][0]                  
__________________________________________________________________________________________________
conv2d_65 (Conv2D)              (None, 128, 128, 64) 73792       concatenate_10[0][0]             
__________________________________________________________________________________________________
conv2d_66 (Conv2D)              (None, 128, 128, 64) 36928       conv2d_65[0][0]                  
__________________________________________________________________________________________________
up_sampling2d_11 (UpSampling2D) (None, 256, 256, 64) 0           conv2d_66[0][0]                  
__________________________________________________________________________________________________
conv2d_67 (Conv2D)              (None, 256, 256, 32) 8224        up_sampling2d_11[0][0]           
__________________________________________________________________________________________________
concatenate_11 (Concatenate)    (None, 256, 256, 64) 0           conv2d_49[0][0]                  
                                                                 conv2d_67[0][0]                  
__________________________________________________________________________________________________
conv2d_68 (Conv2D)              (None, 256, 256, 32) 18464       concatenate_11[0][0]             
__________________________________________________________________________________________________
conv2d_69 (Conv2D)              (None, 256, 256, 32) 9248        conv2d_68[0][0]                  
__________________________________________________________________________________________________
spatial_dropout2d_2 (SpatialDro (None, 256, 256, 32) 0           conv2d_69[0][0]                  
__________________________________________________________________________________________________
conv2d_70 (Conv2D)              (None, 256, 256, 1)  289         spatial_dropout2d_2[0][0]        
__________________________________________________________________________________________________
conv2d_71 (Conv2D)              (None, 256, 256, 1)  2           conv2d_70[0][0]                  
==================================================================================================
Total params: 7,760,355
Trainable params: 7,760,355
Non-trainable params: 0
__________________________________________________________________________________________________

2.2 Train U-net

In [40]:
def preprocess_inputs(X):
    return (2.0 / 255.0) * X - 1.0
In [ ]:
X_preprocessed = preprocess_inputs(X)
In [65]:
from keras.losses import binary_crossentropy

SMOOTH = 1

def jaccard_score(gt, pr, smooth=SMOOTH, threshold=None):
    """ 
        Jaccard index https://en.wikipedia.org/wiki/Jaccard_index
    Args:
        gt: ground truth 4D keras tensor (B, H, W, C)
        pr: prediction 4D keras tensor (B, H, W, C)
        smooth: value to avoid division by zero
        threshold: value to round predictions (use ``>`` comparison), 
                   if ``None`` prediction prediction will not be round
    Returns:
        IoU/Jaccard score in range [0, 1]
    """
    axes = [1, 2]
        
    if threshold is not None:
        pr = K.greater(pr, threshold)
        pr = K.cast(pr, K.floatx())

    intersection = K.sum(gt * pr, axis=axes)
    union = K.sum(gt + pr, axis=axes) - intersection
    iou = (intersection + smooth) / (union + smooth)

    iou = K.mean(iou, axis=0)

    return iou
In [66]:
def create_default_callbacks(workspace_dir, batch_sz=1):
    timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
    checkpoint_folder = workspace_dir / 'checkpoints' / str(timestamp)
    checkpoint_folder.mkdir(parents=True)
    tensorboard_folder = workspace_dir / 'tensorboard_logs' / str(timestamp)
    
    checkpoint = ModelCheckpoint(
        str(checkpoint_folder / 'model-{loss:.2f}.h5'),
        monitor='loss',
        verbose=1,
        save_best_only=True,
        save_weights_only=True,
        mode='auto',
        period=1
    )
    
    stop = EarlyStopping(monitor='loss', patience=200, mode='min', verbose=1)
    reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, min_lr=1e-9, verbose=1, mode='min')
    
    tensorboard = TensorBoard(log_dir=str(tensorboard_folder),
                              histogram_freq=0,
                              batch_size=batch_sz,
                              write_graph=False, 
                              write_grads=False, 
                              write_images=False,
                              embeddings_freq=0, 
                              embeddings_layer_names=None, 
                              embeddings_metadata=None, 
                              embeddings_data=None)
    
    return [reduce_lr, TerminateOnNaN(), checkpoint, tensorboard], checkpoint_folder


def train(model, X_train, Y_train, workspace_dir, epochs=1, batch_sz=1):
    #sgd = SGD(lr=0.01, decay=1e-6, momentum=0.99, nesterov=True)
    
    unet.compile(
        #optimizer=sgd, 
        optimizer='Adam',
        loss=binary_crossentropy,
        metrics=[jaccard_score, 'binary_accuracy']
    )
        
    callbacks, checkpoint_dir = create_default_callbacks(workspace_dir, batch_sz=batch_sz)
    
    model_json = model.to_json()
    with open(str(checkpoint_dir/'graph.json'), 'w') as json_file:
        json_file.write(model_json)
       
    return model.fit(
        X_train, Y_train,
        batch_size=batch_sz,
        epochs=epochs,
        callbacks=callbacks,
        shuffle=True
    )
In [68]:
train(unet, X_preprocessed, Y, workspace_dir, epochs=700, batch_sz=16)
Epoch 1/700
12992/13000 [============================>.] - ETA: 0s - loss: 7.1284 - jaccard_score: 0.3156 - binary_accuracy: 0.8193
Epoch 00001: loss improved from inf to 7.12597, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-7.13.h5
13000/13000 [==============================] - 187s 14ms/step - loss: 7.1260 - jaccard_score: 0.3157 - binary_accuracy: 0.8193
Epoch 2/700
12992/13000 [============================>.] - ETA: 0s - loss: 4.8729 - jaccard_score: 0.4059 - binary_accuracy: 0.9033
Epoch 00002: loss improved from 7.12597 to 4.87104, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-4.87.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 4.8710 - jaccard_score: 0.4059 - binary_accuracy: 0.9033
Epoch 3/700
12992/13000 [============================>.] - ETA: 0s - loss: 3.8339 - jaccard_score: 0.4628 - binary_accuracy: 0.9235
Epoch 00003: loss improved from 4.87104 to 3.83294, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-3.83.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 3.8329 - jaccard_score: 0.4628 - binary_accuracy: 0.9235
Epoch 4/700
12992/13000 [============================>.] - ETA: 0s - loss: 3.0799 - jaccard_score: 0.5066 - binary_accuracy: 0.9378
Epoch 00004: loss improved from 3.83294 to 3.07903, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-3.08.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 3.0790 - jaccard_score: 0.5067 - binary_accuracy: 0.9378
Epoch 5/700
12992/13000 [============================>.] - ETA: 0s - loss: 2.6208 - jaccard_score: 0.5406 - binary_accuracy: 0.9458
Epoch 00005: loss improved from 3.07903 to 2.61973, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-2.62.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 2.6197 - jaccard_score: 0.5407 - binary_accuracy: 0.9458
Epoch 6/700
12992/13000 [============================>.] - ETA: 0s - loss: 2.3761 - jaccard_score: 0.5619 - binary_accuracy: 0.9491
Epoch 00006: loss improved from 2.61973 to 2.37540, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-2.38.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 2.3754 - jaccard_score: 0.5618 - binary_accuracy: 0.9491
Epoch 7/700
12992/13000 [============================>.] - ETA: 0s - loss: 2.1129 - jaccard_score: 0.5835 - binary_accuracy: 0.9543
Epoch 00007: loss improved from 2.37540 to 2.11230, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-2.11.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 2.1123 - jaccard_score: 0.5837 - binary_accuracy: 0.9543
Epoch 8/700
12992/13000 [============================>.] - ETA: 0s - loss: 2.0170 - jaccard_score: 0.5941 - binary_accuracy: 0.9556
Epoch 00008: loss improved from 2.11230 to 2.01653, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-2.02.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 2.0165 - jaccard_score: 0.5941 - binary_accuracy: 0.9556
Epoch 9/700
12992/13000 [============================>.] - ETA: 0s - loss: 1.6151 - jaccard_score: 0.6244 - binary_accuracy: 0.9650
Epoch 00009: loss improved from 2.01653 to 1.61463, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-1.61.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 1.6146 - jaccard_score: 0.6244 - binary_accuracy: 0.9650
Epoch 10/700
12992/13000 [============================>.] - ETA: 0s - loss: 1.5155 - jaccard_score: 0.6350 - binary_accuracy: 0.9666
Epoch 00010: loss improved from 1.61463 to 1.51493, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-1.51.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 1.5149 - jaccard_score: 0.6349 - binary_accuracy: 0.9666
Epoch 11/700
12992/13000 [============================>.] - ETA: 0s - loss: 1.4120 - jaccard_score: 0.6484 - binary_accuracy: 0.9687
Epoch 00011: loss improved from 1.51493 to 1.41150, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-1.41.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 1.4115 - jaccard_score: 0.6485 - binary_accuracy: 0.9687
Epoch 12/700
12992/13000 [============================>.] - ETA: 0s - loss: 1.2423 - jaccard_score: 0.6632 - binary_accuracy: 0.9723
Epoch 00012: loss improved from 1.41150 to 1.24184, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-1.24.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 1.2418 - jaccard_score: 0.6633 - binary_accuracy: 0.9723
Epoch 13/700
12992/13000 [============================>.] - ETA: 0s - loss: 1.1487 - jaccard_score: 0.6740 - binary_accuracy: 0.9740
Epoch 00013: loss improved from 1.24184 to 1.14829, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-1.15.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 1.1483 - jaccard_score: 0.6739 - binary_accuracy: 0.9740
Epoch 14/700
12992/13000 [============================>.] - ETA: 0s - loss: 1.2285 - jaccard_score: 0.6670 - binary_accuracy: 0.9719
Epoch 00014: loss did not improve from 1.14829
13000/13000 [==============================] - 180s 14ms/step - loss: 1.2282 - jaccard_score: 0.6671 - binary_accuracy: 0.9719
Epoch 15/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.9334 - jaccard_score: 0.6966 - binary_accuracy: 0.9787
Epoch 00015: loss improved from 1.14829 to 0.93316, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.93.h5
13000/13000 [==============================] - 179s 14ms/step - loss: 0.9332 - jaccard_score: 0.6966 - binary_accuracy: 0.9787
Epoch 16/700
12992/13000 [============================>.] - ETA: 0s - loss: 1.0423 - jaccard_score: 0.6869 - binary_accuracy: 0.9760
Epoch 00016: loss did not improve from 0.93316
13000/13000 [==============================] - 180s 14ms/step - loss: 1.0419 - jaccard_score: 0.6870 - binary_accuracy: 0.9760
Epoch 17/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.8522 - jaccard_score: 0.7063 - binary_accuracy: 0.9803
Epoch 00017: loss improved from 0.93316 to 0.85181, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.85.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.8518 - jaccard_score: 0.7062 - binary_accuracy: 0.9803
Epoch 18/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.9322 - jaccard_score: 0.7014 - binary_accuracy: 0.9784
Epoch 00018: loss did not improve from 0.85181
13000/13000 [==============================] - 180s 14ms/step - loss: 0.9319 - jaccard_score: 0.7014 - binary_accuracy: 0.9784
Epoch 19/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.9385 - jaccard_score: 0.7015 - binary_accuracy: 0.9782
Epoch 00019: loss did not improve from 0.85181
13000/13000 [==============================] - 180s 14ms/step - loss: 0.9382 - jaccard_score: 0.7014 - binary_accuracy: 0.9782
Epoch 20/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.7731 - jaccard_score: 0.7166 - binary_accuracy: 0.9818
Epoch 00020: loss improved from 0.85181 to 0.77281, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.77.h5
13000/13000 [==============================] - 179s 14ms/step - loss: 0.7728 - jaccard_score: 0.7167 - binary_accuracy: 0.9818
Epoch 21/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.9064 - jaccard_score: 0.7024 - binary_accuracy: 0.9787
Epoch 00021: loss did not improve from 0.77281
13000/13000 [==============================] - 180s 14ms/step - loss: 0.9060 - jaccard_score: 0.7024 - binary_accuracy: 0.9787
Epoch 22/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.7675 - jaccard_score: 0.7145 - binary_accuracy: 0.9818
Epoch 00022: loss improved from 0.77281 to 0.76730, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.77.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.7673 - jaccard_score: 0.7146 - binary_accuracy: 0.9818
Epoch 23/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.7394 - jaccard_score: 0.7238 - binary_accuracy: 0.9825
Epoch 00023: loss improved from 0.76730 to 0.73912, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.74.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.7391 - jaccard_score: 0.7237 - binary_accuracy: 0.9825
Epoch 24/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.6747 - jaccard_score: 0.7342 - binary_accuracy: 0.9838
Epoch 00024: loss improved from 0.73912 to 0.67460, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.67.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.6746 - jaccard_score: 0.7342 - binary_accuracy: 0.9838
Epoch 25/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.8462 - jaccard_score: 0.7123 - binary_accuracy: 0.9799
Epoch 00025: loss did not improve from 0.67460
13000/13000 [==============================] - 181s 14ms/step - loss: 0.8460 - jaccard_score: 0.7123 - binary_accuracy: 0.9799
Epoch 26/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.6712 - jaccard_score: 0.7341 - binary_accuracy: 0.9839
Epoch 00026: loss improved from 0.67460 to 0.67094, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.67.h5
13000/13000 [==============================] - 181s 14ms/step - loss: 0.6709 - jaccard_score: 0.7341 - binary_accuracy: 0.9839
Epoch 27/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.7041 - jaccard_score: 0.7320 - binary_accuracy: 0.9831
Epoch 00027: loss did not improve from 0.67094
13000/13000 [==============================] - 180s 14ms/step - loss: 0.7040 - jaccard_score: 0.7321 - binary_accuracy: 0.9831
Epoch 28/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.5929 - jaccard_score: 0.7516 - binary_accuracy: 0.9856
Epoch 00028: loss improved from 0.67094 to 0.59266, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.59.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.5927 - jaccard_score: 0.7516 - binary_accuracy: 0.9856
Epoch 29/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.8573 - jaccard_score: 0.7225 - binary_accuracy: 0.9797
Epoch 00029: loss did not improve from 0.59266
13000/13000 [==============================] - 180s 14ms/step - loss: 0.8570 - jaccard_score: 0.7224 - binary_accuracy: 0.9797
Epoch 30/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.6984 - jaccard_score: 0.7295 - binary_accuracy: 0.9831
Epoch 00030: loss did not improve from 0.59266
13000/13000 [==============================] - 180s 14ms/step - loss: 0.6982 - jaccard_score: 0.7295 - binary_accuracy: 0.9831
Epoch 31/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.5523 - jaccard_score: 0.7570 - binary_accuracy: 0.9864
Epoch 00031: loss improved from 0.59266 to 0.55215, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.55.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.5521 - jaccard_score: 0.7572 - binary_accuracy: 0.9864
Epoch 32/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.5327 - jaccard_score: 0.7614 - binary_accuracy: 0.9869
Epoch 00032: loss improved from 0.55215 to 0.53259, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.53.h5
13000/13000 [==============================] - 181s 14ms/step - loss: 0.5326 - jaccard_score: 0.7615 - binary_accuracy: 0.9869
Epoch 33/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.7867 - jaccard_score: 0.7151 - binary_accuracy: 0.9811
Epoch 00033: loss did not improve from 0.53259
13000/13000 [==============================] - 181s 14ms/step - loss: 0.7864 - jaccard_score: 0.7152 - binary_accuracy: 0.9811
Epoch 34/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.5911 - jaccard_score: 0.7485 - binary_accuracy: 0.9855
Epoch 00034: loss did not improve from 0.53259
13000/13000 [==============================] - 180s 14ms/step - loss: 0.5909 - jaccard_score: 0.7484 - binary_accuracy: 0.9855
Epoch 35/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.6188 - jaccard_score: 0.7449 - binary_accuracy: 0.9849
Epoch 00035: loss did not improve from 0.53259
13000/13000 [==============================] - 180s 14ms/step - loss: 0.6185 - jaccard_score: 0.7449 - binary_accuracy: 0.9849
Epoch 36/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.5372 - jaccard_score: 0.7598 - binary_accuracy: 0.9868
Epoch 00036: loss did not improve from 0.53259
13000/13000 [==============================] - 180s 14ms/step - loss: 0.5370 - jaccard_score: 0.7597 - binary_accuracy: 0.9868
Epoch 37/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.5212 - jaccard_score: 0.7624 - binary_accuracy: 0.9871
Epoch 00037: loss improved from 0.53259 to 0.52111, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.52.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.5211 - jaccard_score: 0.7624 - binary_accuracy: 0.9871
Epoch 38/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.7977 - jaccard_score: 0.7247 - binary_accuracy: 0.9810
Epoch 00038: loss did not improve from 0.52111
13000/13000 [==============================] - 179s 14ms/step - loss: 0.7973 - jaccard_score: 0.7246 - binary_accuracy: 0.9811
Epoch 39/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.5488 - jaccard_score: 0.7513 - binary_accuracy: 0.9864
Epoch 00039: loss did not improve from 0.52111
13000/13000 [==============================] - 179s 14ms/step - loss: 0.5487 - jaccard_score: 0.7513 - binary_accuracy: 0.9864
Epoch 40/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.5028 - jaccard_score: 0.7652 - binary_accuracy: 0.9875
Epoch 00040: loss improved from 0.52111 to 0.50259, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.50.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.5026 - jaccard_score: 0.7649 - binary_accuracy: 0.9875
Epoch 41/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.4722 - jaccard_score: 0.7756 - binary_accuracy: 0.9882
Epoch 00041: loss improved from 0.50259 to 0.47203, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.47.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.4720 - jaccard_score: 0.7756 - binary_accuracy: 0.9882
Epoch 42/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.8172 - jaccard_score: 0.7152 - binary_accuracy: 0.9808
Epoch 00042: loss did not improve from 0.47203
13000/13000 [==============================] - 180s 14ms/step - loss: 0.8169 - jaccard_score: 0.7152 - binary_accuracy: 0.9808
Epoch 43/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.5764 - jaccard_score: 0.7411 - binary_accuracy: 0.9858
Epoch 00043: loss did not improve from 0.47203
13000/13000 [==============================] - 180s 14ms/step - loss: 0.5762 - jaccard_score: 0.7412 - binary_accuracy: 0.9858
Epoch 44/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.5102 - jaccard_score: 0.7551 - binary_accuracy: 0.9873
Epoch 00044: loss did not improve from 0.47203
13000/13000 [==============================] - 180s 14ms/step - loss: 0.5100 - jaccard_score: 0.7550 - binary_accuracy: 0.9873
Epoch 45/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.5686 - jaccard_score: 0.7513 - binary_accuracy: 0.9861
Epoch 00045: loss did not improve from 0.47203
13000/13000 [==============================] - 181s 14ms/step - loss: 0.5685 - jaccard_score: 0.7513 - binary_accuracy: 0.9861
Epoch 46/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.6043 - jaccard_score: 0.7329 - binary_accuracy: 0.9851
Epoch 00046: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.

Epoch 00046: loss did not improve from 0.47203
13000/13000 [==============================] - 181s 14ms/step - loss: 0.6042 - jaccard_score: 0.7330 - binary_accuracy: 0.9851
Epoch 47/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.4525 - jaccard_score: 0.7711 - binary_accuracy: 0.9886
Epoch 00047: loss improved from 0.47203 to 0.45232, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.45.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.4523 - jaccard_score: 0.7711 - binary_accuracy: 0.9886
Epoch 48/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.4214 - jaccard_score: 0.7843 - binary_accuracy: 0.9894
Epoch 00048: loss improved from 0.45232 to 0.42124, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.42.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.4212 - jaccard_score: 0.7843 - binary_accuracy: 0.9894
Epoch 49/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.4132 - jaccard_score: 0.7869 - binary_accuracy: 0.9896
Epoch 00049: loss improved from 0.42124 to 0.41311, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.41.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.4131 - jaccard_score: 0.7868 - binary_accuracy: 0.9896
Epoch 50/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.4075 - jaccard_score: 0.7899 - binary_accuracy: 0.9897
Epoch 00050: loss improved from 0.41311 to 0.40736, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.41.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.4074 - jaccard_score: 0.7898 - binary_accuracy: 0.9897
Epoch 51/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.4324 - jaccard_score: 0.7820 - binary_accuracy: 0.9891
Epoch 00051: loss did not improve from 0.40736
13000/13000 [==============================] - 181s 14ms/step - loss: 0.4324 - jaccard_score: 0.7821 - binary_accuracy: 0.9891
Epoch 52/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.4073 - jaccard_score: 0.7917 - binary_accuracy: 0.9897
Epoch 00052: loss improved from 0.40736 to 0.40729, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.41.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.4073 - jaccard_score: 0.7918 - binary_accuracy: 0.9897
Epoch 53/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3796 - jaccard_score: 0.8014 - binary_accuracy: 0.9903
Epoch 00053: loss improved from 0.40729 to 0.37948, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.38.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3795 - jaccard_score: 0.8014 - binary_accuracy: 0.9903
Epoch 54/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3792 - jaccard_score: 0.8014 - binary_accuracy: 0.9903
Epoch 00054: loss improved from 0.37948 to 0.37914, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.38.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3791 - jaccard_score: 0.8014 - binary_accuracy: 0.9903
Epoch 55/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3737 - jaccard_score: 0.8029 - binary_accuracy: 0.9905
Epoch 00055: loss improved from 0.37914 to 0.37362, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.37.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3736 - jaccard_score: 0.8029 - binary_accuracy: 0.9905
Epoch 56/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3786 - jaccard_score: 0.8026 - binary_accuracy: 0.9904
Epoch 00056: loss did not improve from 0.37362
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3785 - jaccard_score: 0.8027 - binary_accuracy: 0.9904
Epoch 57/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3811 - jaccard_score: 0.7998 - binary_accuracy: 0.9903
Epoch 00057: loss did not improve from 0.37362
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3809 - jaccard_score: 0.7999 - binary_accuracy: 0.9903
Epoch 58/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3652 - jaccard_score: 0.8071 - binary_accuracy: 0.9907
Epoch 00058: loss improved from 0.37362 to 0.36523, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.37.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3652 - jaccard_score: 0.8072 - binary_accuracy: 0.9907
Epoch 59/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3967 - jaccard_score: 0.7951 - binary_accuracy: 0.9900
Epoch 00059: loss did not improve from 0.36523
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3965 - jaccard_score: 0.7951 - binary_accuracy: 0.9900
Epoch 60/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3464 - jaccard_score: 0.8155 - binary_accuracy: 0.9911
Epoch 00060: loss improved from 0.36523 to 0.34631, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.35.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3463 - jaccard_score: 0.8155 - binary_accuracy: 0.9911
Epoch 61/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3433 - jaccard_score: 0.8151 - binary_accuracy: 0.9912
Epoch 00061: loss improved from 0.34631 to 0.34318, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.34.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3432 - jaccard_score: 0.8151 - binary_accuracy: 0.9912
Epoch 62/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3447 - jaccard_score: 0.8159 - binary_accuracy: 0.9912
Epoch 00062: loss did not improve from 0.34318
13000/13000 [==============================] - 181s 14ms/step - loss: 0.3446 - jaccard_score: 0.8159 - binary_accuracy: 0.9912
Epoch 63/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3328 - jaccard_score: 0.8247 - binary_accuracy: 0.9914
Epoch 00063: loss improved from 0.34318 to 0.33271, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.33.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3327 - jaccard_score: 0.8247 - binary_accuracy: 0.9914
Epoch 64/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.4223 - jaccard_score: 0.7945 - binary_accuracy: 0.9894
Epoch 00064: loss did not improve from 0.33271
13000/13000 [==============================] - 180s 14ms/step - loss: 0.4221 - jaccard_score: 0.7945 - binary_accuracy: 0.9894
Epoch 65/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3387 - jaccard_score: 0.8178 - binary_accuracy: 0.9913
Epoch 00065: loss did not improve from 0.33271
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3386 - jaccard_score: 0.8179 - binary_accuracy: 0.9913
Epoch 66/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3243 - jaccard_score: 0.8285 - binary_accuracy: 0.9916
Epoch 00066: loss improved from 0.33271 to 0.32412, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.32.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3241 - jaccard_score: 0.8284 - binary_accuracy: 0.9916
Epoch 67/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3173 - jaccard_score: 0.8297 - binary_accuracy: 0.9918
Epoch 00067: loss improved from 0.32412 to 0.31728, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.32.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3173 - jaccard_score: 0.8298 - binary_accuracy: 0.9918
Epoch 68/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3183 - jaccard_score: 0.8288 - binary_accuracy: 0.9918
Epoch 00068: loss did not improve from 0.31728
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3182 - jaccard_score: 0.8288 - binary_accuracy: 0.9918
Epoch 69/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3586 - jaccard_score: 0.8077 - binary_accuracy: 0.9908
Epoch 00069: loss did not improve from 0.31728
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3585 - jaccard_score: 0.8077 - binary_accuracy: 0.9908
Epoch 70/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3184 - jaccard_score: 0.8274 - binary_accuracy: 0.9918
Epoch 00070: loss did not improve from 0.31728
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3183 - jaccard_score: 0.8274 - binary_accuracy: 0.9918
Epoch 71/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3090 - jaccard_score: 0.8336 - binary_accuracy: 0.9920
Epoch 00071: loss improved from 0.31728 to 0.30888, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.31.h5
13000/13000 [==============================] - 181s 14ms/step - loss: 0.3089 - jaccard_score: 0.8337 - binary_accuracy: 0.9920
Epoch 72/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3102 - jaccard_score: 0.8341 - binary_accuracy: 0.9920
Epoch 00072: loss did not improve from 0.30888
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3101 - jaccard_score: 0.8342 - binary_accuracy: 0.9920
Epoch 73/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3863 - jaccard_score: 0.8033 - binary_accuracy: 0.9903
Epoch 00073: loss did not improve from 0.30888
13000/13000 [==============================] - 181s 14ms/step - loss: 0.3862 - jaccard_score: 0.8034 - binary_accuracy: 0.9903
Epoch 74/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3085 - jaccard_score: 0.8344 - binary_accuracy: 0.9920
Epoch 00074: loss improved from 0.30888 to 0.30844, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.31.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3084 - jaccard_score: 0.8343 - binary_accuracy: 0.9920
Epoch 75/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3095 - jaccard_score: 0.8328 - binary_accuracy: 0.9920
Epoch 00075: loss did not improve from 0.30844
13000/13000 [==============================] - 181s 14ms/step - loss: 0.3094 - jaccard_score: 0.8326 - binary_accuracy: 0.9920
Epoch 76/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2946 - jaccard_score: 0.8433 - binary_accuracy: 0.9924
Epoch 00076: loss improved from 0.30844 to 0.29457, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.29.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.2946 - jaccard_score: 0.8432 - binary_accuracy: 0.9924
Epoch 77/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3506 - jaccard_score: 0.8171 - binary_accuracy: 0.9911
Epoch 00077: loss did not improve from 0.29457
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3506 - jaccard_score: 0.8171 - binary_accuracy: 0.9911
Epoch 78/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3135 - jaccard_score: 0.8280 - binary_accuracy: 0.9919
Epoch 00078: loss did not improve from 0.29457
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3133 - jaccard_score: 0.8279 - binary_accuracy: 0.9919
Epoch 79/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2967 - jaccard_score: 0.8417 - binary_accuracy: 0.9923
Epoch 00079: loss did not improve from 0.29457
13000/13000 [==============================] - 180s 14ms/step - loss: 0.2965 - jaccard_score: 0.8417 - binary_accuracy: 0.9923
Epoch 80/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2871 - jaccard_score: 0.8469 - binary_accuracy: 0.9926
Epoch 00080: loss improved from 0.29457 to 0.28708, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.29.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.2871 - jaccard_score: 0.8469 - binary_accuracy: 0.9926
Epoch 81/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3240 - jaccard_score: 0.8235 - binary_accuracy: 0.9917
Epoch 00081: loss did not improve from 0.28708
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3239 - jaccard_score: 0.8234 - binary_accuracy: 0.9917
Epoch 82/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3196 - jaccard_score: 0.8193 - binary_accuracy: 0.9918
Epoch 00082: loss did not improve from 0.28708
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3194 - jaccard_score: 0.8191 - binary_accuracy: 0.9918
Epoch 83/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2929 - jaccard_score: 0.8370 - binary_accuracy: 0.9924
Epoch 00083: loss did not improve from 0.28708
13000/13000 [==============================] - 179s 14ms/step - loss: 0.2929 - jaccard_score: 0.8370 - binary_accuracy: 0.9924
Epoch 84/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3773 - jaccard_score: 0.8119 - binary_accuracy: 0.9906
Epoch 00084: loss did not improve from 0.28708
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3772 - jaccard_score: 0.8119 - binary_accuracy: 0.9906
Epoch 85/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.3202 - jaccard_score: 0.8234 - binary_accuracy: 0.9917
Epoch 00085: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.

Epoch 00085: loss did not improve from 0.28708
13000/13000 [==============================] - 180s 14ms/step - loss: 0.3201 - jaccard_score: 0.8233 - binary_accuracy: 0.9917
Epoch 86/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2787 - jaccard_score: 0.8520 - binary_accuracy: 0.9928
Epoch 00086: loss improved from 0.28708 to 0.27855, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.28.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.2785 - jaccard_score: 0.8519 - binary_accuracy: 0.9928
Epoch 87/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2735 - jaccard_score: 0.8548 - binary_accuracy: 0.9929
Epoch 00087: loss improved from 0.27855 to 0.27340, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.27.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.2734 - jaccard_score: 0.8548 - binary_accuracy: 0.9929
Epoch 88/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2649 - jaccard_score: 0.8613 - binary_accuracy: 0.9931
Epoch 00088: loss improved from 0.27340 to 0.26478, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.26.h5
13000/13000 [==============================] - 181s 14ms/step - loss: 0.2648 - jaccard_score: 0.8613 - binary_accuracy: 0.9931
Epoch 89/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2625 - jaccard_score: 0.8604 - binary_accuracy: 0.9932
Epoch 00089: loss improved from 0.26478 to 0.26243, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.26.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.2624 - jaccard_score: 0.8604 - binary_accuracy: 0.9932
Epoch 90/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2615 - jaccard_score: 0.8606 - binary_accuracy: 0.9932
Epoch 00090: loss improved from 0.26243 to 0.26138, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.26.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.2614 - jaccard_score: 0.8607 - binary_accuracy: 0.9932
Epoch 91/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2549 - jaccard_score: 0.8653 - binary_accuracy: 0.9934
Epoch 00091: loss improved from 0.26138 to 0.25491, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.25.h5
13000/13000 [==============================] - 181s 14ms/step - loss: 0.2549 - jaccard_score: 0.8653 - binary_accuracy: 0.9934
Epoch 92/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2578 - jaccard_score: 0.8605 - binary_accuracy: 0.9933
Epoch 00092: loss did not improve from 0.25491
13000/13000 [==============================] - 182s 14ms/step - loss: 0.2577 - jaccard_score: 0.8604 - binary_accuracy: 0.9933
Epoch 93/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2502 - jaccard_score: 0.8685 - binary_accuracy: 0.9935
Epoch 00093: loss improved from 0.25491 to 0.25007, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.25.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.2501 - jaccard_score: 0.8685 - binary_accuracy: 0.9935
Epoch 94/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2575 - jaccard_score: 0.8628 - binary_accuracy: 0.9933
Epoch 00094: loss did not improve from 0.25007
13000/13000 [==============================] - 179s 14ms/step - loss: 0.2574 - jaccard_score: 0.8629 - binary_accuracy: 0.9933
Epoch 95/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2446 - jaccard_score: 0.8727 - binary_accuracy: 0.9937
Epoch 00095: loss improved from 0.25007 to 0.24449, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.24.h5
13000/13000 [==============================] - 181s 14ms/step - loss: 0.2445 - jaccard_score: 0.8727 - binary_accuracy: 0.9937
Epoch 96/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2419 - jaccard_score: 0.8736 - binary_accuracy: 0.9937
Epoch 00096: loss improved from 0.24449 to 0.24188, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.24.h5
13000/13000 [==============================] - 181s 14ms/step - loss: 0.2419 - jaccard_score: 0.8737 - binary_accuracy: 0.9937
Epoch 97/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2421 - jaccard_score: 0.8693 - binary_accuracy: 0.9937
Epoch 00097: loss did not improve from 0.24188
13000/13000 [==============================] - 180s 14ms/step - loss: 0.2420 - jaccard_score: 0.8693 - binary_accuracy: 0.9937
Epoch 98/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2396 - jaccard_score: 0.8727 - binary_accuracy: 0.9938
Epoch 00098: loss improved from 0.24188 to 0.23952, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.24.h5
13000/13000 [==============================] - 181s 14ms/step - loss: 0.2395 - jaccard_score: 0.8727 - binary_accuracy: 0.9938
Epoch 99/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2404 - jaccard_score: 0.8745 - binary_accuracy: 0.9938
Epoch 00099: loss did not improve from 0.23952
13000/13000 [==============================] - 180s 14ms/step - loss: 0.2403 - jaccard_score: 0.8745 - binary_accuracy: 0.9938
Epoch 100/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2371 - jaccard_score: 0.8761 - binary_accuracy: 0.9939
Epoch 00100: loss improved from 0.23952 to 0.23699, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.24.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.2370 - jaccard_score: 0.8761 - binary_accuracy: 0.9939
Epoch 101/700
12992/13000 [============================>.] - ETA: 0s - loss: 0.2322 - jaccard_score: 0.8783 - binary_accuracy: 0.9940
Epoch 00101: loss improved from 0.23699 to 0.23214, saving model to /home/sergey_sintsov/dubai/workspace_2019-06-26-07-35-58/checkpoints/2019-06-26-08-44-13/model-0.23.h5
13000/13000 [==============================] - 180s 14ms/step - loss: 0.2321 - jaccard_score: 0.8784 - binary_accuracy: 0.9940
Epoch 102/700
 9488/13000 [====================>.........] - ETA: 48s - loss: 0.2316 - jaccard_score: 0.8849 - binary_accuracy: 0.9940
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-68-ff24a3be4edb> in <module>
----> 1 train(unet, X_preprocessed, Y, workspace_dir, epochs=700, batch_sz=16)

<ipython-input-66-a7351a2cf608> in train(model, X_train, Y_train, workspace_dir, epochs, batch_sz)
     54         epochs=epochs,
     55         callbacks=callbacks,
---> 56         shuffle=True
     57     )

/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
   1603           initial_epoch=initial_epoch,
   1604           steps_per_epoch=steps_per_epoch,
-> 1605           validation_steps=validation_steps)
   1606 
   1607   def evaluate(self,

/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/engine/training_arrays.py in fit_loop(model, inputs, targets, sample_weights, batch_size, epochs, verbose, callbacks, val_inputs, val_targets, val_sample_weights, shuffle, initial_epoch, steps_per_epoch, validation_steps)
    212           ins_batch[i] = ins_batch[i].toarray()
    213 
--> 214         outs = f(ins_batch)
    215         if not isinstance(outs, list):
    216           outs = [outs]

/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/backend.py in __call__(self, inputs)
   2976 
   2977     fetched = self._callable_fn(*array_vals,
-> 2978                                 run_metadata=self.run_metadata)
   2979     self._call_fetch_callbacks(fetched[-len(self._fetches):])
   2980     return fetched[:len(self.outputs)]

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in __call__(self, *args, **kwargs)
   1397           ret = tf_session.TF_SessionRunCallable(
   1398               self._session._session, self._handle, args, status,
-> 1399               run_metadata_ptr)
   1400         if run_metadata:
   1401           proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

KeyboardInterrupt: 

3 Check the model

3.1 Load trained model

In [69]:
import json

final_model_path = workspace_dir / 'checkpoints' / '2019-06-26-08-44-13' / 'model-0.23.h5'

with open(str(final_model_path.parent / 'graph.json'), 'r') as json_file:
    fitted_model = model_from_json(json_file.read())
    
fitted_model.load_weights(str(final_model_path), by_name=True)
In [71]:
fitted_model.summary()
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_3 (InputLayer)            (None, 256, 256, 3)  0                                            
__________________________________________________________________________________________________
conv2d_48 (Conv2D)              (None, 256, 256, 32) 896         input_3[0][0]                    
__________________________________________________________________________________________________
conv2d_49 (Conv2D)              (None, 256, 256, 32) 9248        conv2d_48[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_8 (MaxPooling2D)  (None, 128, 128, 32) 0           conv2d_49[0][0]                  
__________________________________________________________________________________________________
conv2d_50 (Conv2D)              (None, 128, 128, 64) 18496       max_pooling2d_8[0][0]            
__________________________________________________________________________________________________
conv2d_51 (Conv2D)              (None, 128, 128, 64) 36928       conv2d_50[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_9 (MaxPooling2D)  (None, 64, 64, 64)   0           conv2d_51[0][0]                  
__________________________________________________________________________________________________
conv2d_52 (Conv2D)              (None, 64, 64, 128)  73856       max_pooling2d_9[0][0]            
__________________________________________________________________________________________________
conv2d_53 (Conv2D)              (None, 64, 64, 128)  147584      conv2d_52[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_10 (MaxPooling2D) (None, 32, 32, 128)  0           conv2d_53[0][0]                  
__________________________________________________________________________________________________
conv2d_54 (Conv2D)              (None, 32, 32, 256)  295168      max_pooling2d_10[0][0]           
__________________________________________________________________________________________________
conv2d_55 (Conv2D)              (None, 32, 32, 256)  590080      conv2d_54[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_11 (MaxPooling2D) (None, 16, 16, 256)  0           conv2d_55[0][0]                  
__________________________________________________________________________________________________
conv2d_56 (Conv2D)              (None, 16, 16, 512)  1180160     max_pooling2d_11[0][0]           
__________________________________________________________________________________________________
conv2d_57 (Conv2D)              (None, 16, 16, 512)  2359808     conv2d_56[0][0]                  
__________________________________________________________________________________________________
up_sampling2d_8 (UpSampling2D)  (None, 32, 32, 512)  0           conv2d_57[0][0]                  
__________________________________________________________________________________________________
conv2d_58 (Conv2D)              (None, 32, 32, 256)  524544      up_sampling2d_8[0][0]            
__________________________________________________________________________________________________
concatenate_8 (Concatenate)     (None, 32, 32, 512)  0           conv2d_55[0][0]                  
                                                                 conv2d_58[0][0]                  
__________________________________________________________________________________________________
conv2d_59 (Conv2D)              (None, 32, 32, 256)  1179904     concatenate_8[0][0]              
__________________________________________________________________________________________________
conv2d_60 (Conv2D)              (None, 32, 32, 256)  590080      conv2d_59[0][0]                  
__________________________________________________________________________________________________
up_sampling2d_9 (UpSampling2D)  (None, 64, 64, 256)  0           conv2d_60[0][0]                  
__________________________________________________________________________________________________
conv2d_61 (Conv2D)              (None, 64, 64, 128)  131200      up_sampling2d_9[0][0]            
__________________________________________________________________________________________________
concatenate_9 (Concatenate)     (None, 64, 64, 256)  0           conv2d_53[0][0]                  
                                                                 conv2d_61[0][0]                  
__________________________________________________________________________________________________
conv2d_62 (Conv2D)              (None, 64, 64, 128)  295040      concatenate_9[0][0]              
__________________________________________________________________________________________________
conv2d_63 (Conv2D)              (None, 64, 64, 128)  147584      conv2d_62[0][0]                  
__________________________________________________________________________________________________
up_sampling2d_10 (UpSampling2D) (None, 128, 128, 128 0           conv2d_63[0][0]                  
__________________________________________________________________________________________________
conv2d_64 (Conv2D)              (None, 128, 128, 64) 32832       up_sampling2d_10[0][0]           
__________________________________________________________________________________________________
concatenate_10 (Concatenate)    (None, 128, 128, 128 0           conv2d_51[0][0]                  
                                                                 conv2d_64[0][0]                  
__________________________________________________________________________________________________
conv2d_65 (Conv2D)              (None, 128, 128, 64) 73792       concatenate_10[0][0]             
__________________________________________________________________________________________________
conv2d_66 (Conv2D)              (None, 128, 128, 64) 36928       conv2d_65[0][0]                  
__________________________________________________________________________________________________
up_sampling2d_11 (UpSampling2D) (None, 256, 256, 64) 0           conv2d_66[0][0]                  
__________________________________________________________________________________________________
conv2d_67 (Conv2D)              (None, 256, 256, 32) 8224        up_sampling2d_11[0][0]           
__________________________________________________________________________________________________
concatenate_11 (Concatenate)    (None, 256, 256, 64) 0           conv2d_49[0][0]                  
                                                                 conv2d_67[0][0]                  
__________________________________________________________________________________________________
conv2d_68 (Conv2D)              (None, 256, 256, 32) 18464       concatenate_11[0][0]             
__________________________________________________________________________________________________
conv2d_69 (Conv2D)              (None, 256, 256, 32) 9248        conv2d_68[0][0]                  
__________________________________________________________________________________________________
spatial_dropout2d_2 (SpatialDro (None, 256, 256, 32) 0           conv2d_69[0][0]                  
__________________________________________________________________________________________________
conv2d_70 (Conv2D)              (None, 256, 256, 1)  289         spatial_dropout2d_2[0][0]        
__________________________________________________________________________________________________
conv2d_71 (Conv2D)              (None, 256, 256, 1)  2           conv2d_70[0][0]                  
==================================================================================================
Total params: 7,760,355
Trainable params: 7,760,355
Non-trainable params: 0
__________________________________________________________________________________________________

3.2 Use test images

In [73]:
X_test = []
Y_test = []

for i, src in enumerate(src_images_and_masks):
    x, y = cut_map_into_tiles(
        src['map'], 
        src['mask'], 
        tile_size=TILES_SIZE,
        tile_resize=UNET_INPUT_SIZE,
        tiles_count=10,
        tile_prefix=str(i),
        save_tiles=False
    )
    
    X_test += x
    Y_test += y
    print('done', i)
done 0
done 1
done 2
done 3
done 4
done 5
done 6
done 7
done 8
done 9
done 10
done 11
done 12

3.3 Run inference

In [84]:
def convert_grayscale_data_to_red_rgba(mask, alpha_value=50):
    data = (mask * 255).astype('uint8')
    alpha = (mask * alpha_value).astype('uint8')

    data = data.reshape((data.shape[0], data.shape[1], 1))
    npad = ((0, 0), (0, 0), (0, 2))
    rgba_array = np.pad(data, pad_width=npad, mode='constant', constant_values=0)

    rgba_array = np.insert(
        rgba_array,
        3,
        alpha,
        axis=2
    )
    return Image.fromarray(rgba_array, 'RGBA')

def apply_predicted_mask(orig_image, predicted_2d_values):
    predicted_img = convert_grayscale_data_to_red_rgba(predicted_2d_values)

    orig_image = Image.fromarray(orig_image.astype('uint8'), 'RGB')
    orig_image = orig_image.convert('RGBA')
    orig_image.paste(predicted_img, (0, 0), predicted_img)

    return orig_image.convert('RGB')
In [85]:
for x,y in zip(X_test, Y_test):
    x_np = preprocess_inputs(np.array(x))
    predicted = fitted_model.predict(x_np[np.newaxis,:,:,:])
    predicted_2d = predicted.reshape((predicted.shape[1], predicted.shape[2]))
    
    display(apply_predicted_mask(x, predicted_2d))
In [ ]: